From: Keir Fraser Date: Fri, 9 Oct 2009 08:34:03 +0000 (+0100) Subject: x86: trust new architecturally-defined TSC Invariant bit on Intel systems X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~13239 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22?a=commitdiff_plain;h=f98ee759c5e0209ae78dbf416df792339ced9e2e;p=xen.git x86: trust new architecturally-defined TSC Invariant bit on Intel systems Trust new architecturally-defined TSC Invariant bit (on Intel systems only for now, AMD TBD). Signed-off-by: Dan Magenheimer Signed-off-by: Keir Fraser --- diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c index 80a57d6dcf..608b67a320 100644 --- a/xen/arch/x86/setup.c +++ b/xen/arch/x86/setup.c @@ -66,6 +66,10 @@ integer_param("maxcpus", max_cpus); static int opt_watchdog = 0; boolean_param("watchdog", opt_watchdog); +/* opt_tsc_unstable: Override all tests; assume TSC is unreliable. */ +static int opt_tsc_unstable; +boolean_param("tsc_unstable", opt_tsc_unstable); + /* **** Linux config option: propagated to domain0. */ /* "acpi=off": Sisables both ACPI table parsing and interpreter. */ /* "acpi=force": Override the disable blacklist. */ @@ -461,6 +465,14 @@ void __init __start_xen(unsigned long mbi_p) } cmdline_parse(cmdline); + /* If TSC is marked as unstable, clear all enhanced TSC features. */ + if ( opt_tsc_unstable ) + { + setup_clear_cpu_cap(X86_FEATURE_CONSTANT_TSC); + setup_clear_cpu_cap(X86_FEATURE_NONSTOP_TSC); + setup_clear_cpu_cap(X86_FEATURE_TSC_RELIABLE); + } + parse_video_info(); set_current((struct vcpu *)0xfffff000); /* debug sanity */ diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c index 7ba6813da1..2a64bd5f27 100644 --- a/xen/arch/x86/smpboot.c +++ b/xen/arch/x86/smpboot.c @@ -187,6 +187,11 @@ static void __init synchronize_tsc_bp (void) unsigned int one_usec; int buggy = 0; + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { + printk("TSC is reliable, synchronization unnecessary\n"); + return; + } + printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus()); /* convert from kcyc/sec to cyc/usec */ @@ -279,6 +284,9 @@ static void __init synchronize_tsc_ap (void) { int i; + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) + return; + /* * Not every cpu is online at the time * this gets called, so we first wait for the BP to diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index 7f1a1153d7..4a3b64fe92 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -37,13 +37,6 @@ static char __initdata opt_clocksource[10]; string_param("clocksource", opt_clocksource); -/* - * opt_consistent_tscs: All TSCs tick at the exact same rate, allowing - * simplified system time handling. - */ -static int opt_consistent_tscs; -boolean_param("consistent_tscs", opt_consistent_tscs); - unsigned long cpu_khz; /* CPU clock frequency in kHz. */ DEFINE_SPINLOCK(rtc_lock); unsigned long pit0_ticks; @@ -977,7 +970,7 @@ static void local_time_calibration(void) /* The overall calibration scale multiplier. */ u32 calibration_mul_frac; - if ( opt_consistent_tscs ) + if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ) { /* Atomically read cpu_calibration struct and write cpu_time struct. */ local_irq_disable(); @@ -1110,6 +1103,10 @@ struct calibration_rendezvous { u64 master_tsc_stamp; }; +/* + * Keep TSCs in sync when they run at the same rate, but may stop in + * deep-sleep C states. + */ static void time_calibration_tsc_rendezvous(void *_r) { int i; @@ -1161,6 +1158,7 @@ static void time_calibration_tsc_rendezvous(void *_r) raise_softirq(TIME_CALIBRATE_SOFTIRQ); } +/* Ordinary rendezvous function which does not modify TSC values. */ static void time_calibration_std_rendezvous(void *_r) { struct cpu_calibration *c = &this_cpu(cpu_calibration); @@ -1190,6 +1188,9 @@ static void time_calibration_std_rendezvous(void *_r) raise_softirq(TIME_CALIBRATE_SOFTIRQ); } +static void (*time_calibration_rendezvous_fn)(void *) = + time_calibration_std_rendezvous; + static void time_calibration(void *unused) { struct calibration_rendezvous r = { @@ -1199,9 +1200,7 @@ static void time_calibration(void *unused) /* @wait=1 because we must wait for all cpus before freeing @r. */ on_selected_cpus(&r.cpu_calibration_map, - opt_consistent_tscs - ? time_calibration_tsc_rendezvous - : time_calibration_std_rendezvous, + time_calibration_rendezvous_fn, &r, 1); } @@ -1229,15 +1228,15 @@ void init_percpu_time(void) /* Late init function (after all CPUs are booted). */ int __init init_xen_time(void) { - if ( !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ) - opt_consistent_tscs = 0; - - /* If we have constant TSCs then scale factor can be shared. */ - if ( opt_consistent_tscs ) + /* If we have constant-rate TSCs then scale factor can be shared. */ + if ( boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ) { int cpu; for_each_possible_cpu ( cpu ) per_cpu(cpu_time, cpu).tsc_scale = per_cpu(cpu_time, 0).tsc_scale; + /* If TSCs are not marked as 'reliable', re-sync during rendezvous. */ + if ( !boot_cpu_has(X86_FEATURE_TSC_RELIABLE) ) + time_calibration_rendezvous_fn = time_calibration_tsc_rendezvous; } open_softirq(TIME_CALIBRATE_SOFTIRQ, local_time_calibration);